Close

@InProceedings{CordeiroCarn:2020:HoTrYo,
               author = "Cordeiro, Filipe Rolim and Carneiro, Gustavo",
          affiliation = "{Universidade Federal Rural de Pernambuco} and {University of 
                         Adelaide}",
                title = "A Survey on Deep Learning with Noisy Labels: How to train your 
                         model when you cannot trust on the annotations?",
            booktitle = "Proceedings...",
                 year = "2020",
               editor = "Musse, Soraia Raupp and Cesar Junior, Roberto Marcondes and 
                         Pelechano, Nuria and Wang, Zhangyang (Atlas)",
         organization = "Conference on Graphics, Patterns and Images, 33. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "noisy labels, deep learning.",
             abstract = "Noisy Labels are commonly present in data sets automatically 
                         collected from the internet, mislabeled by non- specialist 
                         annotators, or even specialists in a challenging task, such as in 
                         the medical field. Although deep learning models have shown 
                         significant improvements in different domains, an open issue is 
                         their ability to memorize noisy labels during training, reducing 
                         their generalization potential. As deep learning models depend on 
                         correctly labeled data sets and label correctness is difficult to 
                         guarantee, it is crucial to consider the presence of noisy labels 
                         for deep learning training. Several approaches have been proposed 
                         in the literature to improve the training of deep learning models 
                         in the presence of noisy labels. This paper presents a survey on 
                         the main techniques in literature, in which we classify the 
                         algorithm in the following groups: robust losses, sample 
                         weighting, sample selection, meta-learning, and combined 
                         approaches. We also present the commonly used experimental setup, 
                         data sets, and results of the state-of-the-art models.",
  conference-location = "Porto de Galinhas (virtual)",
      conference-year = "7-10 Nov. 2020",
                  doi = "10.1109/SIBGRAPI51738.2020.00010",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI51738.2020.00010",
             language = "en",
                  ibi = "8JMKD3MGPEW34M/43BHB8L",
                  url = "http://urlib.net/ibi/8JMKD3MGPEW34M/43BHB8L",
           targetfile = "Tutorial_ID_4_SIBGRAPI_2020_camara_ready_v2 copy.pdf",
        urlaccessdate = "2024, May 03"
}


Close